import config as cfg
import requests
import cv2
from PIL import Image
import io
import base64
import matplotlib.pyplot as plt
import pprint
pp = pprint.PrettyPrinter(indent=4)
BASE_URL = "https://kcmj21es3e.execute-api.us-east-1.amazonaws.com/prod/v1"
def encode_image(image):
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
image64 = base64.b64encode(buffered.getvalue())
return image64.decode()
Authentication¶
response = requests.post(
BASE_URL + "/auth/token",
data={
"username": cfg.USERNAME,
"password": cfg.PASSWORD,
},
)
token = response.json()["access_token"]
headers = {"Authorization": "Bearer %s" % token}
Panoramics¶
# The image is converted to grayscale in the backend, so you can convert it before sending in order to save bandwidth.
panoramic_image = Image.open("images/panoramic0.jpg").convert("L")
Dentition Endpoint¶
Classify the panoramic radiography in either toothless, superior toothless, inferior toothless, mixed or "normal".
dentition_response = requests.post(
BASE_URL + "/panoramics/dentition",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
pdata = dentition_response.json()
pdata["entities"] = pdata["entities"][:4]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': 'normal',
'contour': None,
'line': None,
'point': None,
'score': 1.0,
'tooth': None,
'type': 'dentition'},
{ 'bbox': None,
'class_name': 'superior_toothless',
'contour': None,
'line': None,
'point': None,
'score': 7.882013930426979e-10,
'tooth': None,
'type': 'dentition'},
{ 'bbox': None,
'class_name': 'mixed',
'contour': None,
'line': None,
'point': None,
'score': 2.5856181778971532e-11,
'tooth': None,
'type': 'dentition'},
{ 'bbox': None,
'class_name': 'inferior_toothless',
'contour': None,
'line': None,
'point': None,
'score': 1.471757177934352e-12,
'tooth': None,
'type': 'dentition'}],
'height': 1292,
'image_hash': None,
'width': 2444}
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(panoramic_image, cmap="gray")
ax.axis("off");
Longaxis Endpoint¶
Returns a point coordinates for the crown and the root of every tooth in the panoramic image.
longaxis_response = requests.post(
BASE_URL + "/panoramics/longaxis",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
Output Sample¶
pdata = longaxis_response.json()
pdata["entities"] = pdata["entities"][:3]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': '18_0',
'contour': None,
'line': None,
'point': [632.8214285714287, 553.7142857142857],
'score': 0.5159812122583389,
'tooth': None,
'type': 'longaxis'},
{ 'bbox': None,
'class_name': '18_1',
'contour': None,
'line': None,
'point': [611.0, 351.83928571428567],
'score': 0.5159812122583389,
'tooth': None,
'type': 'longaxis'},
{ 'bbox': None,
'class_name': '17_0',
'contour': None,
'line': None,
'point': [752.8392857142858, 594.0892857142857],
'score': 0.594242125749588,
'tooth': None,
'type': 'longaxis'}],
'height': 1292,
'image_hash': None,
'width': 2444}
Visualization¶
from vis import draw_longaxis_output
dimage = draw_longaxis_output(panoramic_image, longaxis_response.json()["entities"], draw_axis=True)
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(dimage)
ax.axis("off");
Panorogram Endpoint¶
Returns the panorogram curves of the panoramic image.
panorogram_response = requests.post(
BASE_URL + "/panoramics/panorogram",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
Output Sample¶
pdata = panorogram_response.json()
pdata["entities"] = [{**p, "contour": p["contour"][:3]} for p in pdata["entities"][:2]]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': 'ContMand',
'contour': [[150, 179], [149, 180], [148, 180]],
'line': None,
'point': None,
'score': None,
'tooth': None,
'type': 'panorogram'},
{ 'bbox': None,
'class_name': 'CanManDir',
'contour': [[386, 547], [385, 548], [384, 548]],
'line': None,
'point': None,
'score': None,
'tooth': None,
'type': 'panorogram'}],
'height': 1292,
'image_hash': None,
'width': 2444}
Visualization¶
from vis import draw_panorogram
dimage = draw_panorogram(panoramic_image, [[p["class_name"], p["contour"]] for p in panorogram_response.json()["entities"]])
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(dimage)
ax.axis("off");
Metals Endpoint¶
Returns metal structures found on the panoramic image.
metals_response = requests.post(
BASE_URL + "/panoramics/metals",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
Sample Output¶
pp.pprint(metals_response.json())
{ 'entities': [ { 'bbox': [ 860.0716552734375,
624.821044921875,
1569.6455078125,
783.9153442382812],
'class_name': 'ortodontia',
'contour': None,
'line': None,
'point': None,
'score': 0.9565339088439941,
'tooth': None,
'type': 'metals'},
{ 'bbox': [ 800.822021484375,
469.63043212890625,
1654.4962158203125,
637.3797607421875],
'class_name': 'ortodontia',
'contour': None,
'line': None,
'point': None,
'score': 0.831466555595398,
'tooth': None,
'type': 'metals'}],
'height': 1292,
'image_hash': None,
'width': 2444}
Visualization¶
from vis import draw_bboxes
dimage = draw_bboxes(panoramic_image, metals_response.json()["entities"])
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(dimage)
ax.axis("off");
Teeth Segmentation Endpoint¶
Return a segmentation mask in the form of a contour for each found tooth on the panoramic image.
teeth_segmentation_response = requests.post(
BASE_URL + "/panoramics/teeth-segmentation",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
Output Sample¶
teeth_segmentation_response
<Response [200]>
pdata = teeth_segmentation_response.json()
pdata["entities"] = [{**p, "contour": p["contour"][:3]} for p in pdata["entities"][:3]]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': '18',
'contour': [[608, 355], [607, 356], [606, 356]],
'line': None,
'point': None,
'score': None,
'tooth': None,
'type': 'teeth_segmentation'},
{ 'bbox': None,
'class_name': '17',
'contour': [[701, 370], [700, 371], [699, 371]],
'line': None,
'point': None,
'score': None,
'tooth': None,
'type': 'teeth_segmentation'},
{ 'bbox': None,
'class_name': '16',
'contour': [[828, 362], [827, 363], [826, 363]],
'line': None,
'point': None,
'score': None,
'tooth': None,
'type': 'teeth_segmentation'}],
'height': 1292,
'image_hash': None,
'width': 2444}
Visualization¶
from vis import contour2mask, draw_masks
contours_list = [[p["contour"]] for p in teeth_segmentation_response.json()["entities"]]
masks = [contour2mask(contour, *panoramic_image.size) for contour in contours_list]
dimage = draw_masks(panoramic_image, masks)
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(dimage)
ax.axis("off");
Procedures Endpoint¶
Returns dental proceadures found on the panoramic image.
procedures_response = requests.post(
BASE_URL + "/panoramics/procedures",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
Output Sample¶
pdata = procedures_response.json()
pdata["entities"] = pdata["entities"][:1]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': 'MatRestCoron',
'contour': None,
'line': [ [632.8214285714287, 553.7142857142857],
[611.0, 351.83928571428567]],
'point': None,
'score': 0.6624748706817627,
'tooth': '18',
'type': 'procedure'}],
'height': 1292,
'image_hash': None,
'width': 2444}
Visualization¶
from vis import draw_procedures_output
dimage = draw_procedures_output(panoramic_image, [e for e in procedures_response.json()["entities"] if e["score"] > 0.5])
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(dimage)
ax.axis("off");
Describe Endpoint¶
The describe endpoint will return the result of all models at once.
The models can be called individually from the endpoints described in the docs.
describe_response = requests.post(
BASE_URL + "/panoramics/describe",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
Output Sample¶
pdata = describe_response.json()
pdata["entities"] = pdata["entities"][:3]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': 'normal',
'contour': None,
'line': None,
'point': None,
'score': 1.0,
'tooth': None,
'type': 'dentition'},
{ 'bbox': None,
'class_name': 'superior_toothless',
'contour': None,
'line': None,
'point': None,
'score': 7.882013930426979e-10,
'tooth': None,
'type': 'dentition'},
{ 'bbox': None,
'class_name': 'mixed',
'contour': None,
'line': None,
'point': None,
'score': 2.5856181778971532e-11,
'tooth': None,
'type': 'dentition'}],
'height': 1292,
'image_hash': None,
'width': 2444}
Anomalies Heatmap Endpoint¶
Returns a heatmap of the anomalies found on the panoramic image.
anomalies_response = requests.post(
BASE_URL + "/panoramics/teeth-anomalies-heatmap",
json={
"base64_image": encode_image(panoramic_image),
},
headers=headers,
)
# Found classes:
for idx, data in enumerate(anomalies_response.json()):
if idx > 7:
break
if len(data["anomalies"]) == 0:
continue
print(f"Tooth {data['tooth_name']} Anomalies -" + ", ".join([anno["anomaly_name"] for anno in data["anomalies"]]), end=" ")
print("\n")
Tooth 18 Anomalies -NodP Tooth 17 Anomalies -Calc, NodP, Car Tooth 16 Anomalies -Calc, NodP, Car Tooth 13 Anomalies -Calc Tooth 12 Anomalies -Calc, Car Tooth 11 Anomalies -Calc, Car Tooth 21 Anomalies -Calc, Car
Visualization¶
Visualizing the Calc class.
from utils import process_ret_api
from vis import draw_heatmap
heatmap = process_ret_api(anomalies_response.json(), h=panoramic_image.height, w=panoramic_image.width, anomaly2see="Calc")
dimage = draw_heatmap(panoramic_image, heatmap)
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(dimage)
ax.axis("off");
Periapicals¶
# The image is converted to grayscale in the backend, so you can convert it before sending in order to save bandwidth.
periapical_image = Image.open("images/periapical0.jpg").convert("L")
Classify Endpoint¶
The classify endpoint will return the periapical type among the 14 possible regions.
classify_response = requests.post(
BASE_URL + "/periapicals/classify",
json={
"base64_image": encode_image(periapical_image),
},
headers=headers,
)
Output Sample¶
pdata = classify_response.json()
pdata["entities"] = pdata["entities"][:4]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': '42-41-31-32',
'contour': None,
'line': None,
'point': None,
'score': 0.9996941089630127,
'tooth': None,
'type': 'periapical_classification'},
{ 'bbox': None,
'class_name': '33',
'contour': None,
'line': None,
'point': None,
'score': 0.0002522027643863112,
'tooth': None,
'type': 'periapical_classification'},
{ 'bbox': None,
'class_name': '43',
'contour': None,
'line': None,
'point': None,
'score': 3.3788826840464026e-05,
'tooth': None,
'type': 'periapical_classification'},
{ 'bbox': None,
'class_name': '24-25',
'contour': None,
'line': None,
'point': None,
'score': 6.261640010052361e-06,
'tooth': None,
'type': 'periapical_classification'}],
'height': 1621,
'image_hash': None,
'width': 1236}
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(periapical_image, cmap="gray")
ax.axis("off");
Longaxis Endpoit¶
Return the coordinates on the longaxis of each teeth found on the periapical image.
Sample Output¶
longaxis_response = requests.post(
BASE_URL + "/periapicals/longaxis",
json={
"base64_image": encode_image(periapical_image),
},
headers=headers,
)
pdata = longaxis_response.json()
pdata["entities"] = pdata["entities"][:3]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': '43_1',
'contour': None,
'line': None,
'point': [135.1875, 1215.75],
'score': 0.05661439150571823,
'tooth': None,
'type': 'longaxis'},
{ 'bbox': None,
'class_name': '43_0',
'contour': None,
'line': None,
'point': [19.3125, 329.265625],
'score': 0.2382279932498932,
'tooth': None,
'type': 'longaxis'},
{ 'bbox': None,
'class_name': '42_1',
'contour': None,
'line': None,
'point': [299.34375, 1063.78125],
'score': 0.45180168747901917,
'tooth': None,
'type': 'longaxis'}],
'height': 1621,
'image_hash': None,
'width': 1236}
Visualization¶
from vis import draw_longaxis_output
dimage = draw_longaxis_output(
periapical_image, longaxis_response.json()["entities"], draw_axis=True, th=0.001
)
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(dimage)
ax.axis("off");
Cephalometry¶
Lateral Endpoint¶
Returns anatomical point coordinates in the lateral radiograph.
tele_image = Image.open("images/tele0.jpg").convert("L")
cefbot_response = requests.post(
BASE_URL + "/cephalometry/lateral",
json={
"base64_image": encode_image(tele_image),
},
headers=headers,
)
pdata = cefbot_response.json()
pdata["entities"] = pdata["entities"][:3]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': 'Nal',
'contour': None,
'line': None,
'model_name': 'cefalometry',
'point': [1399.0, 445.0],
'score': 0.875812828540802,
'tooth': None},
{ 'bbox': None,
'class_name': 'Na',
'contour': None,
'line': None,
'model_name': 'cefalometry',
'point': [1330.0, 445.0],
'score': 0.9012552499771118,
'tooth': None},
{ 'bbox': None,
'class_name': 'Or',
'contour': None,
'line': None,
'model_name': 'cefalometry',
'point': [1268.0, 765.0],
'score': 0.8242092132568359,
'tooth': None}],
'image_hash': None,
'model_name': 'cefalometry',
'output_height': 2304,
'output_width': 2104}
Detected Cepholometric points¶
Nal , Na , Or , SO , RO , Cl , S , Si , Sp , Cls , Po , Cli , Ba , Od , Bo , Op , Co , Bpc , Ar , Bac , C , Dc , Go , Me , Pog , Gn , E , B , Id , M , Pm , D , A , Ena , Pli , Enp , KR , Te , Cm , Ptm , PVT , Vasa , Vasp , Vaia , Vaip , Xi , Bar , D6s , Ams , 6s , Ppd , 6i , Ami , A4 , C4 , PAR , Ap , 3s , 3i , Aii , Iii , Iis , Sf1 , Ais , Pogl , Bl , Li , Stm , Ls , Al , Sn , Prn , Pn , EILI , AA , ADS , AD2 , R , AD1 , ADI , POR , D8 , M8 , D7 , V , T , Tuber , Pi , PTVR , LN , C2 , PPM , C2m , C2pi , C3ps , C3as , C3a , C3m , C3pi , C4ps , C4as , C4ai , C4m , C4pi
Visualization¶
from vis import draw_points
dimage = draw_points(tele_image, cefbot_response.json()["entities"])
fig, ax = plt.subplots(figsize=(14, 14))
ax.imshow(dimage)
ax.axis("off");
Frontal Endpoint¶
Returns anatomical point coordinates in the frontal radiograph.
tele_image = Image.open("images/tele1.jpg").convert("L")
cefbot_response = requests.post(
BASE_URL + "/cephalometry/frontal",
json={
"base64_image": encode_image(tele_image),
},
headers=headers,
)
pdata = cefbot_response.json()
pdata["entities"] = pdata["entities"][:3]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': '1',
'contour': None,
'line': None,
'model_name': 'frontal_cefalometry',
'point': [231.0, 232.0],
'score': 0.8913635015487671,
'tooth': None},
{ 'bbox': None,
'class_name': '2',
'contour': None,
'line': None,
'model_name': 'frontal_cefalometry',
'point': [713.0, 210.0],
'score': 0.8802495002746582,
'tooth': None},
{ 'bbox': None,
'class_name': '3',
'contour': None,
'line': None,
'model_name': 'frontal_cefalometry',
'point': [257.0, 241.0],
'score': 0.8347787261009216,
'tooth': None}],
'image_hash': None,
'model_name': 'frontal_cefalometry',
'output_height': 1030,
'output_width': 966}
Detected Cepholometric points¶
Linha Obliqua Esqueda, Linha Obliqua Direita, Ponto ZL, Ponto CLE, Ponto CIE, Ponto CIME, Ponto CSME, Ponto CSE, Ponto CSD, Ponto CSMD, Ponto CIMD, Ponto CID, Ponto CLD, Ponto ZR, Ponto CZE, Ponto ZA, Ponto MME, Ponto PME, Ponto IME, Gonion Esq, Ponto AG, Ponto Me, Ponto GA, Gonion Dir, Ponto PMD, Ponto IMD, Ponto JR, Ponto MMD, Ponto AZ, Ponto CZD, Ponto CN, Ponto ICD, Ponto Ena, Ponto ICE, Ponto NC, Ponto JL, Ponto A6, Ponto Posterior De DOWNS Esq, Ponto B6, Ponto L6, Ponto P6, Ponto B3, Ponto DA1, Ponto DB1, Ponto A1, Ponto B1, Ponto 1BD, Ponto 1AD, Ponto 3B, Ponto 6P, Ponto 6L, Ponto 6B, Ponto Posterior De DOWNS Dir, Ponto 6
Visualization¶
from vis import draw_points
dimage = draw_points(tele_image, cefbot_response.json()["entities"])
fig, ax = plt.subplots(figsize=(14, 14))
ax.imshow(dimage)
ax.axis("off");
Analysis¶
Face Endpoint¶
Returns anatomical point coordinates in the frontal face image.
face_image = Image.open("images/face0.jpg").convert("L")
cefbot_response = requests.post(
BASE_URL + "/analysis/face",
json={
"base64_image": encode_image(face_image),
},
headers=headers,
)
pdata = cefbot_response.json()
pdata["entities"] = pdata["entities"][:3]
pp.pprint(pdata)
{ 'entities': [ { 'bbox': None,
'class_name': '1',
'contour': None,
'line': None,
'model_name': 'face',
'point': [525.0, 456.0],
'score': 0.9999104738235474,
'tooth': None},
{ 'bbox': None,
'class_name': '2',
'contour': None,
'line': None,
'model_name': 'face',
'point': [525.0, 719.0],
'score': 0.9895437955856323,
'tooth': None},
{ 'bbox': None,
'class_name': '3',
'contour': None,
'line': None,
'model_name': 'face',
'point': [525.0, 781.0],
'score': 0.9950990080833435,
'tooth': None}],
'image_hash': None,
'model_name': 'face',
'output_height': 1600,
'output_width': 1067}
Detected Face points¶
Trichion, Glabela, Dorso Nasal, Pró-Nasal, Sub-Nasal, Filtrum, Lábio Superior, Incisal Incisiva Superior, Lábio Inferior, Mentoniano Mole, Canto do olho direito, Pupila direita, Zigomático direito, Asa direita do nariz, Comissura labial direita, Canto do olho esquerdo, Pupila esquerda, Zigomático esquerdo, Asa esquerda do nariz, Comissura labial esquerda.
Visualization¶
from vis import draw_points
dimage = draw_points(face_image, cefbot_response.json()["entities"])
fig, ax = plt.subplots(figsize=(14, 14))
ax.imshow(dimage)
ax.axis("off");